Another potentially interesting question we can try to answer is how much face representation we see across the task. In order to do so, we’ve trained a linear SVM classifier within subjects on the data from the smoothed FFA localizer to classify signal into faces, objects and scrambles. We can then apply that classifier to various facets of our data. For each of these analyses, we will look at the probability of the classifier predicting a face. If the classifier does indeed predict a face, we score that TR with a “1”, otherwise, it gets a “0”, meaning chance becomes 1/3 = .33.
First, we will apply it to each TR of individual trials. Trials are split into 4 bins based on accuracy and load, and averaged over those measures to create a single time course for each category. The classifier was also applied to each TR of a “template” for each condition. In this analysis, all trials in a given condition were averaged to create a prototypical example for the category. The classifier was then applied to those averages.
We can then look at the probability of classification across subjects. First, we look at it across all subjects, but then we can look at it across our working memory capacity groups.
Finally, we will relate these neural measures to behavior, both averaged over time and for each TR.
library(reshape2)
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.0 ──
## ✓ ggplot2 3.3.2 ✓ purrr 0.3.4
## ✓ tibble 3.0.3 ✓ dplyr 1.0.1
## ✓ tidyr 1.1.1 ✓ stringr 1.4.0
## ✓ readr 1.3.1 ✓ forcats 0.5.0
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
library(patchwork)
load('data/behav.RData')
load('data/split_groups_info.RData')
load('data/DFR_split_groups_info.RData')
source("helper_fxns/split_into_groups.R")
source('helper_fxns/prep_trial_levels_for_plot.R')
source("helper_fxns/split_trial_type.R")
se <- function(x) {
sd(x,na.rm=TRUE)/sqrt(length(x[!is.na(x)]))
}
#classifier information
clf_acc <- read.csv('data/MVPA/HPC_csvs/clf_acc.csv')
best_c <- read.csv('data/MVPA/HPC_csvs/best_C.csv')
# averaages from template
averages_from_template <- list(high_correct = read.csv('data/MVPA/HPC_csvs/all_suj_high_correct_avg.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/HPC_csvs/all_suj_high_incorrect_avg.csv',header=FALSE),
low_correct = read.csv('data/MVPA/HPC_csvs/all_suj_low_correct_avg.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/HPC_csvs/all_suj_low_incorrect_avg.csv',header=FALSE))
averages_from_template[["high_load_correct_diff"]] <- averages_from_template[["high_correct"]][,1:14] - averages_from_template[["high_incorrect"]][,1:14]
averages_from_template[["low_load_correct_diff"]] <- averages_from_template[["low_correct"]][,1:14] - averages_from_template[["low_incorrect"]][,1:14]
# averages from individual trials
individual_trial_averages_probs <- list(
high_correct = read.csv('data/MVPA/HPC_csvs/all_suj_high_correct_indiv_trial_avg_probs.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/HPC_csvs/all_suj_high_incorrect_indiv_trial_avg_probs.csv',header=FALSE),
low_correct = read.csv('data/MVPA/HPC_csvs/all_suj_low_correct_indiv_trial_avg_probs.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/HPC_csvs/all_suj_low_incorrect_indiv_trial_avg_probs.csv',header=FALSE))
individual_trial_averages_probs[["high_load_correct_diff"]] <- individual_trial_averages_probs[["high_correct"]][,1:14] - individual_trial_averages_probs[["high_incorrect"]][,1:14]
individual_trial_averages_probs[["low_load_correct_diff"]] <- individual_trial_averages_probs[["low_correct"]][,1:14] - individual_trial_averages_probs[["low_incorrect"]][,1:14]
# averages from individual trials
individual_trial_averages_preds <- list(
high_correct = read.csv('data/MVPA/HPC_csvs/all_suj_high_correct_indiv_trial_avg_preds.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/HPC_csvs/all_suj_high_incorrect_indiv_trial_avg_preds.csv',header=FALSE),
low_correct = read.csv('data/MVPA/HPC_csvs/all_suj_low_correct_indiv_trial_avg_preds.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/HPC_csvs/all_suj_low_incorrect_indiv_trial_avg_preds.csv',header=FALSE))
individual_trial_averages_preds[["high_load_correct_diff"]] <- individual_trial_averages_preds[["high_correct"]][,1:14] - individual_trial_averages_preds[["high_incorrect"]][,1:14]
individual_trial_averages_preds[["low_load_correct_diff"]] <- individual_trial_averages_preds[["low_correct"]][,1:14] - individual_trial_averages_preds[["low_incorrect"]][,1:14]
# replace NaNs with NA, add in PTID
averages_from_template2 <- list()
indiv_probs <- list()
indiv_preds <- list()
for (i in seq.int(1,6)){
averages_from_template2[[names(averages_from_template)[i]]] <- averages_from_template[[i]][c(1:9,11:170),]
indiv_preds[[names(averages_from_template)[i]]] <- individual_trial_averages_preds[[i]][c(1:9,11:170),]
indiv_probs[[names(averages_from_template)[i]]] <- individual_trial_averages_probs[[i]][c(1:9,11:170),]
for (ii in seq.int(1,14)){
averages_from_template2[[i]][is.nan(averages_from_template2[[i]][,ii]),ii] <- NA
indiv_probs[[i]][is.nan(indiv_probs[[i]][,ii]),ii] <- NA
indiv_preds[[i]][is.nan(indiv_preds[[i]][,ii]),ii] <- NA
}
averages_from_template2[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
indiv_probs[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
indiv_preds[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
}
averages_from_template <- averages_from_template2
individual_trial_averages_preds <- indiv_preds
individual_trial_averages_probs <- indiv_probs
rm(averages_from_template2)
rm(indiv_preds)
rm(indiv_probs)
save(list=c("clf_acc", "best_c", "averages_from_template", "individual_trial_averages_probs","individual_trial_averages_preds"), file = "data/MVPA_HPC.RData")
On average, we were able to classify faces with 48.7% accuracy (statistically significantly different from chance = 0.33). The classifier was trained on data from an independent FFA localizer. Data was extracted from the bilateral hippocampus. From that mask, the top 100 voxels based on the faces vs objects contrast in the overall subject GLM were selected as features for the classifier. The data used to train the classifier were shifted by 4.5 seconds to account for the hemodynamic delay.
A linear SVM classifer was used; the localizer task was split into 6 blocks of stimuli. These blocks were used in a pre-defined split for cross validation, where one block of each stimulus type was held out as a test set. Data were normalized within the training and test sets separately. Within this training set, another cross validation process was repeated to tune the cost of the model over the values [0.01, 0.1, 1, 10]. The best value of the cost function was used for each cross validation to score the classifier on the test set. The best classifer was also used to predict face presence at each TR during the DFR task.
clf_acc$average <- rowMeans(clf_acc, na.rm = TRUE)
t.test(clf_acc$average,mu=0.33)
##
## One Sample t-test
##
## data: clf_acc$average
## t = 19.197, df = 168, p-value < 2.2e-16
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.4712995 0.5036936
## sample estimates:
## mean of x
## 0.4874965
template_preds_melt <- prep_trial_levels_for_plot(averages_from_template)
## Using level as id variables
individual_trial_probs_melt <- prep_trial_levels_for_plot(individual_trial_averages_probs)
## Using level as id variables
individual_trial_preds_melt <- prep_trial_levels_for_plot(individual_trial_averages_preds)
## Using level as id variables
The shape of the time course is different here than it was for the fusiform region - here, we’re well below chance for encoding, but start to see a significant probability during delay (starting around TR 8) and the probe.
Here, we’re seeing a similiar pattern to the fusform, where we see peaks of decoding accuracy around the encoding period and then probe period. However, unlike the fusiform, we’re also seeing above chance accuracy for all trial types during the delay period. We also see that during encoding, high load trials (regardless of accuracy) show a higher probability of having a face decoded than low load trials. There are no differences between trial types, however, during probe.
ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Probability of classifier predicting a face")+
theme_classic()
t.test(individual_trial_averages_probs[["high_correct"]]$V8,mu=0.33)
##
## One Sample t-test
##
## data: individual_trial_averages_probs[["high_correct"]]$V8
## t = 4.5119, df = 168, p-value = 1.203e-05
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.3481088 0.3762841
## sample estimates:
## mean of x
## 0.3621964
t.test(individual_trial_averages_probs[["high_incorrect"]]$V8,mu=0.33)
##
## One Sample t-test
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V8
## t = 3.4558, df = 168, p-value = 0.0006947
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.3474146 0.3938220
## sample estimates:
## mean of x
## 0.3706183
t.test(individual_trial_averages_probs[["low_correct"]]$V8,mu=0.33)
##
## One Sample t-test
##
## data: individual_trial_averages_probs[["low_correct"]]$V8
## t = 4.2318, df = 168, p-value = 3.804e-05
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.3451973 0.3717755
## sample estimates:
## mean of x
## 0.3584864
t.test(individual_trial_averages_probs[["low_incorrect"]]$V8,mu=0.33)
##
## One Sample t-test
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V8
## t = 2.3665, df = 111, p-value = 0.01969
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.3404141 0.4476377
## sample estimates:
## mean of x
## 0.3940259
encoding_level_avg <- data.frame(high = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V6, individual_trial_averages_probs[["high_incorrect"]]$V6), na.rm=TRUE), low = rowMeans(cbind(individual_trial_averages_probs[["low_correct"]]$V6, individual_trial_averages_probs[["low_incorrect"]]$V6),na.rm=TRUE))
t.test(encoding_level_avg$high,encoding_level_avg$low,paired=TRUE)
##
## Paired t-test
##
## data: encoding_level_avg$high and encoding_level_avg$low
## t = 2.5939, df = 168, p-value = 0.01033
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## 0.007115631 0.052448866
## sample estimates:
## mean of the differences
## 0.02978225
encoding_acc_avg <- data.frame(correct = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V6, individual_trial_averages_probs[["low_correct"]]$V6), na.rm=TRUE), incorrect = rowMeans(cbind(individual_trial_averages_probs[["low_incorrect"]]$V6, individual_trial_averages_probs[["high_incorrect"]]$V6),na.rm=TRUE))
t.test(encoding_acc_avg$correct,encoding_acc_avg$incorrect,paired=TRUE)
##
## Paired t-test
##
## data: encoding_acc_avg$correct and encoding_acc_avg$incorrect
## t = -1.3706, df = 168, p-value = 0.1723
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.044782554 0.008080779
## sample estimates:
## mean of the differences
## -0.01835089
probe_data_indiv <- data.frame(high_correct=individual_trial_averages_probs[["high_correct"]]$V11, high_incorrect = individual_trial_averages_probs[["high_incorrect"]]$V11, low_correct = individual_trial_averages_probs[["low_correct"]]$V11)
probe_data_indiv <- melt(probe_data_indiv)
## No id variables; using all as measure variables
probe.aov <- aov(value ~ variable, data = probe_data_indiv)
summary(probe.aov)
## Df Sum Sq Mean Sq F value Pr(>F)
## variable 2 0.074 0.03676 2.763 0.0641 .
## Residuals 504 6.706 0.01331
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
TukeyHSD(probe.aov)
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = value ~ variable, data = probe_data_indiv)
##
## $variable
## diff lwr upr p adj
## high_incorrect-high_correct 0.021262722 -0.008234506 0.05075995 0.2082925
## low_correct-high_correct -0.007074556 -0.036571784 0.02242267 0.8394081
## low_correct-high_incorrect -0.028337278 -0.057834506 0.00115995 0.0627787
It seems like there’s really not much difference between correct and incorrect trials in the hippocampus.
ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
geom_line(aes(x=TR,y=0), linetype="dotted")+
geom_line(aes(color=level))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
theme_classic()
In the templates, we see a similar structure as in the individual trials with peaks around encoding and probe, though there is below chance decoding during delay period. The only difference between trial types comes through at TR 10 (early in probe), where we see a higher probability of predicting a face for correct vs incorrect trials, though by TR 11, this difference has disappeared.
ggplot(data=template_preds_melt%>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Probability of classifier predicting a face")+
theme_classic()
acc_data_probe <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,averages_from_template[["low_correct"]]$V10)), incorrect = averages_from_template[["high_incorrect"]]$V10)
t.test(acc_data_probe$correct,acc_data_probe$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: acc_data_probe$correct and acc_data_probe$incorrect
## t = 2.4559, df = 168, p-value = 0.01507
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## 0.01479709 0.13609107
## sample estimates:
## mean of the differences
## 0.07544408
acc_data_late_probe <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V11,averages_from_template[["low_correct"]]$V11)), incorrect = averages_from_template[["high_incorrect"]]$V11)
t.test(acc_data_late_probe$correct,acc_data_late_probe$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: acc_data_late_probe$correct and acc_data_late_probe$incorrect
## t = -0.19689, df = 168, p-value = 0.8442
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.07611210 0.06230737
## sample estimates:
## mean of the differences
## -0.006902367
Unlike in the other regions, there is no difference in the overall probability of predicting a face from the template vs individual trials.
compare_across_temp_indiv <- data.frame(template = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,
averages_from_template[["high_incorrect"]]$V10,
averages_from_template[["low_correct"]]$V10)),
indiv = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V10,
individual_trial_averages_probs[["high_incorrect"]]$V10,
individual_trial_averages_probs[["low_correct"]]$V10)))
wilcox.test(compare_across_temp_indiv$template, compare_across_temp_indiv$indiv,paired=TRUE)
##
## Wilcoxon signed rank test with continuity correction
##
## data: compare_across_temp_indiv$template and compare_across_temp_indiv$indiv
## V = 7508, p-value = 0.6099
## alternative hypothesis: true location shift is not equal to 0
ggplot(data=template_preds_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
theme_classic()
split_template <- split_trial_type(averages_from_template,WM_groups)
split_indiv_probs <- split_trial_type(individual_trial_averages_probs, WM_groups)
split_indiv_preds <- split_trial_type(individual_trial_averages_preds, WM_groups)
In the correct trials (regardless of load), we see that medium capacity subjects have a higher likelihood of predicting a face than high or low capacity subjects during encoding.
indiv_avgs <- list()
for (i in seq.int(1,4)){
indiv_avgs[[i]] <- ggplot(data = split_indiv_probs[["avgs"]][[i]][["all"]])+
geom_line(aes(x=TR,y=mean,color=group))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(split_indiv_probs[["avgs"]])[i])+
ylab("Probability")+
theme_classic()
}
(indiv_avgs[[1]] + indiv_avgs[[2]]) / (indiv_avgs[[3]] + indiv_avgs[[4]])+
plot_layout(guides = "collect")+
plot_annotation(title="Probability of classifier predicting a face from individual trials")
print("encoding")
## [1] "encoding"
for (trial_type in seq.int(1,4)){
print(names(split_indiv_probs[["all_data"]])[trial_type])
temp.aov <- aov(split_indiv_probs[["all_data"]][[trial_type]][["all"]][,6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][,16])
print(summary(temp.aov))
print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.0711 0.03557
## Residuals 164 1.7929 0.01093
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 3.254 0.0411 *
## Residuals
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.044419643 -0.002315023 0.091154309 0.0663020
## low-high 0.001449545 -0.045497070 0.048396161 0.9970641
## low-med -0.042970097 -0.089916713 0.003976519 0.0803744
##
## [1] "high_incorrect"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.123 0.06126
## Residuals 164 4.293 0.02617
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2.34 0.0995 .
## Residuals
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.02228571 -0.05002906 0.09460049 0.7467205
## low-high -0.04312003 -0.11576277 0.02952270 0.3412208
## low-med -0.06540575 -0.13804848 0.00723699 0.0870402
##
## [1] "low_correct"
## Df Sum Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.0494
## Residuals 164 1.1535
## Mean Sq F value
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.024677 3.509
## Residuals 0.007033
## Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.0322 *
## Residuals
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.009401786 -0.02808413 0.046887699 0.8239568
## low-high -0.030948896 -0.06860481 0.006707022 0.1297575
## low-med -0.040350682 -0.07800660 -0.002694763 0.0325184
##
## [1] "low_incorrect"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.411 0.20540
## Residuals 107 9.296 0.08688
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2.364 0.0989 .
## Residuals
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 57 observations deleted due to missingness
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.09845475 -0.07433204 0.2712415 0.3686141
## low-high 0.14980403 -0.01447301 0.3140811 0.0815366
## low-med 0.05134929 -0.10732502 0.2100236 0.7226940
Interestingly, at encoding, we see that the medium and high capacity subjects have significantly higher probability of predicting a face than the low capacity subjects. It also appears that for correct trials, medium capacity subjects have higher probability earlier than either low or high capacity subjects, but I’m not sure of the best way to test that.
template_avgs <- list()
for (i in seq.int(1,4)){
template_avgs[[i]] <- ggplot(data = split_template[["avgs"]][[i]][["all"]])+
geom_line(aes(x=TR,y=mean,color=group))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(split_template[["avgs"]])[i])+
ylab("Probability")+
theme_classic()
}
(template_avgs[[1]] + template_avgs[[2]]) / (template_avgs[[3]] + template_avgs[[4]])+
plot_layout(guides = "collect")+
plot_annotation(title="Probability of classifier predicting a face from trial templates")
for (trial_type in seq.int(1,4)){
print(names(split_template[["all_data"]])[trial_type])
temp.aov <- aov(split_template[["all_data"]][[trial_type]][["all"]][,6] ~ split_template[["all_data"]][[trial_type]][["all"]][,16])
print(summary(temp.aov))
print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.17 0.08524
## Residuals 164 24.54 0.14964
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 0.57 0.567
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.07737857 -0.09553013 0.2502873 0.5412316
## low-high 0.02992305 -0.14376982 0.2036159 0.9126050
## low-med -0.04745552 -0.22114839 0.1262374 0.7947733
##
## [1] "high_incorrect"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 1.392 0.6959
## Residuals 164 21.578 0.1316
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 5.289 0.00594 **
## Residuals
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.002976786 -0.1591571 0.16511071 0.9989609
## low-high -0.192747727 -0.3556170 -0.02987849 0.0157538
## low-med -0.195724513 -0.3585937 -0.03285528 0.0139126
##
## [1] "low_correct"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.151 0.07533
## Residuals 164 24.514 0.14948
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 0.504 0.605
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.02083036 -0.1519814 0.1936421 0.9561969
## low-high -0.05086513 -0.2244606 0.1227304 0.7678991
## low-med -0.07169549 -0.2452910 0.1019000 0.5925333
##
## [1] "low_incorrect"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0 0
## Residuals 107 0 0
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16]
## Residuals
## 57 observations deleted due to missingness
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0 0 0 NaN
## low-high 0 0 0 NaN
## low-med 0 0 0 NaN
If we look averaged over time, we see a significant positive correlation with high load accuracy in high load incorrect trials, and a significant negative correlation with BPRS total score on correct low load trials. We also see a significant positive correlation between hhigh load accuracy and the difference between correct and incorrect trials at both low and high loads.
indiv_avg_over_time <- data.frame(high_correct = rowMeans(individual_trial_averages_probs[["high_correct"]][,1:14]),
high_incorrect = rowMeans(individual_trial_averages_probs[["high_incorrect"]][,1:14]),
low_correct = rowMeans(individual_trial_averages_probs[["low_correct"]][,1:14]),
low_incorrect = rowMeans(individual_trial_averages_probs[["low_incorrect"]][,1:14],na.rm=TRUE),
high_load_diff_correct = rowMeans(individual_trial_averages_probs[["high_load_correct_diff"]][,1:14]),
low_load_diff_correct = rowMeans(individual_trial_averages_probs[["low_load_correct_diff"]][,1:14]))
indiv_avg_over_time[is.na(indiv_avg_over_time)] <- NA
indiv_avg_over_time <- data.frame(indiv_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI[c(1:9,11:170)], PTID = constructs_fMRI$PTID[c(1:9,11:170)])
indiv_avg_over_time <- merge(indiv_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")
plot_list <- list()
for (i in seq.int(1,6)){
plot_data <- indiv_avg_over_time[,c(i+1,8:11)]
colnames(plot_data)[1] <- "prob"
plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=omnibus_span))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
}
(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
(plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) +
plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
(plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) +
plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
(plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) +
plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
cor.test(indiv_avg_over_time$low_load_diff_correct, indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_load_diff_correct and indiv_avg_over_time$omnibus_span
## t = 2.6161, df = 110, p-value = 0.01014
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.0591235 0.4092030
## sample estimates:
## cor
## 0.2420241
cor.test(indiv_avg_over_time$high_load_diff_correct, indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$high_load_diff_correct and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -2.1149, df = 167, p-value = 0.03592
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3050307 -0.0108112
## sample estimates:
## cor
## -0.1615076
cor.test(indiv_avg_over_time$low_load_diff_correct, indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_load_diff_correct and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -2.174, df = 110, p-value = 0.03184
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3744264 -0.0180984
## sample estimates:
## cor
## -0.2029725
cor.test(indiv_avg_over_time$high_incorrect, indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$high_incorrect and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = 2.1778, df = 167, p-value = 0.03082
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.01561061 0.30937788
## sample estimates:
## cor
## 0.166179
cor.test(indiv_avg_over_time$low_correct, indiv_avg_over_time$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_correct and indiv_avg_over_time$BPRS_TOT
## t = -1.9794, df = 167, p-value = 0.04942
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2956064110 -0.0004530147
## sample estimates:
## cor
## -0.1514027
If we look at the patterns over time, we can see that BPRS tends to be negatively related to classification during encoding and probe periods. There is most correlation with accuracy during the encoding period. Span generally has a low correlation with classification probability.
Next step is to pull out some of these correlations and see if they’re significant.
data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")
data_to_plot <- data_to_plot[c(1:9,11:170),c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1
corr_to_behav_plots <- list()
for (i in seq.int(1,6)){
measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
for (measure in seq.int(2,5)){
for (TR in seq.int(1,14)){
measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],individual_trial_averages_probs[[i]][,TR],use = "pairwise.complete.obs")
}
}
measure_by_time <- data.frame(t(measure_by_time))
measure_by_time$TR <- seq.int(1,14)
colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
corr_to_behav_plots[[names(individual_trial_averages_probs)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
geom_line(aes(color=variable))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(individual_trial_averages_probs)[i])+
theme_classic()
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between difference across correctness in face classification and behavioral measures")
plot_list <- list()
for (trial_type in seq.int(1,6)){
temp_plot_data <- merge(p200_data, individual_trial_averages_probs[[trial_type]],by="PTID")
temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
# Acc
plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
# BPRS
plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
}
There is a significant positive relationship between accuracy and decoding probability in incorrect high load trials. We also see significant correlations between omnibus span and the correct/incorrect difference at high load, and high load accuracy at difference at both high and low load.
(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /
(plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /
(plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /
(plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 2.2433, df = 110, p-value = 0.02688
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.02455756 0.37996907
## sample estimates:
## cor
## 0.2091602
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 3.3254, df = 167, p-value = 0.001085
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1020913 0.3856617
## sample estimates:
## cor
## 0.2492106
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -2.6454, df = 167, p-value = 0.008938
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.34118029 -0.05113764
## sample estimates:
## cor
## -0.2005495
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -2.4181, df = 110, p-value = 0.01724
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.39380141 -0.04080756
## sample estimates:
## cor
## -0.2246622
Same with delay - relationship with accuracy at incorrect high load trials and difference in correct/incorrect and high load accuracy.
(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /
(plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /
(plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /
(plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.4393, df = 110, p-value = 0.1529
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.05088581 0.31359830
## sample estimates:
## cor
## 0.1359537
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V8,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V8 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 2.0886, df = 167, p-value = 0.03826
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.008803745 0.303208692
## sample estimates:
## cor
## 0.1595517
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -1.0343, df = 110, p-value = 0.3033
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.27862121 0.08903798
## sample estimates:
## cor
## -0.09813921
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V8,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V8 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -2.3046, df = 167, p-value = 0.02242
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.31809253 -0.02527267
## sample estimates:
## cor
## -0.1755629
Probability of classification at incorrect high load trials is significantly negatively correlated with BPRS at TR 11, and the difference between correct trials at high load trials and BPRS.
(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /
(plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /
(plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /
(plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.462, df = 110, p-value = 0.1466
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.04874608 0.31553110
## sample estimates:
## cor
## 0.1380585
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V11,temp_plot_data$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V11 and temp_plot_data$BPRS_TOT
## t = -2.8955, df = 167, p-value = 0.004293
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3577874 -0.0699862
## sample estimates:
## cor
## -0.2186363
cor.test(individual_trial_averages_probs[["low_correct"]]$V11,temp_plot_data$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_correct"]]$V11 and temp_plot_data$BPRS_TOT
## t = -1.9519, df = 167, p-value = 0.05262
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.293689093 0.001646585
## sample estimates:
## cor
## -0.1493506
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V11,temp_plot_data$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V11 and temp_plot_data$BPRS_TOT
## t = 2.8801, df = 167, p-value = 0.004496
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.06883426 0.35677768
## sample estimates:
## cor
## 0.2175338
behav_classification_corr_list <- list()
for (trial_type in seq.int(1,6)){
group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_omnibus) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_omnibus) <- seq.int(1,14)
group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_acc) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_acc) <- seq.int(1,14)
group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_BPRS) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_BPRS) <- seq.int(1,14)
for (level in seq.int(1,3)){
temp_subj <- split_indiv_probs[["all_data"]][[trial_type]][[level]][order(split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID),]
temp_data <- data_to_plot[data_to_plot$PTID %in% split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID,]
for (TR in seq.int(1,14)){
group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
}
group_corrs_acc$level <- factor(rownames(group_corrs_acc))
group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
}
behav_classification_corr_list[["omnibus"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_omnibus
behav_classification_corr_list[["BPRS"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_BPRS
behav_classification_corr_list[["L3_Acc"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_acc
}
behav_classification_corr_melt <- list()
behav_split_plot_list <- list()
for (measure in seq.int(1,3)){
for (trial_type in seq.int(1,6)){
behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <-
ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
geom_line(aes(color=level))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
xlab("TR")+
ylab("Correlation")+
theme_classic()
}
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) /
(behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) +
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) /
(behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) +
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) /
(behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) +
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
If we average over time, there are no relationships between template classification and behavior, though the relationship between omnibus span does trend towards significance.
template_avg_over_time <- data.frame(high_correct = rowMeans(averages_from_template[["high_correct"]][,1:14]),
high_incorrect = rowMeans(averages_from_template[["high_incorrect"]][,1:14]),
low_correct = rowMeans(averages_from_template[["low_correct"]][,1:14]),
low_incorrect = rowMeans(averages_from_template[["low_incorrect"]][,1:14],na.rm=TRUE),
high_load_diff_correct = rowMeans(averages_from_template[["high_load_correct_diff"]][,1:14]),
low_load_diff_correct = rowMeans(averages_from_template[["low_load_correct_diff"]][,1:14]))
template_avg_over_time[is.na(template_avg_over_time)] <- NA
template_avg_over_time <- data.frame(template_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI[c(1:9,11:170)], PTID = constructs_fMRI$PTID[c(1:9,11:170)])
template_avg_over_time <- merge(template_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")
plot_list <- list()
for (i in seq.int(1,6)){
plot_data <- template_avg_over_time[,c(i+1,8:11)]
colnames(plot_data)[1] <- "prob"
plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=omnibus_span))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
}
(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
(plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) +
plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
(plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) +
plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
(plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) +
plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
cor.test(template_avg_over_time$high_incorrect, template_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: template_avg_over_time$high_incorrect and template_avg_over_time$omnibus_span
## t = 1.9219, df = 167, p-value = 0.05632
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.003943647 0.291588729
## sample estimates:
## cor
## 0.147104
data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")
data_to_plot <- data_to_plot[c(1:9,11:170),c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1
corr_to_behav_plots <- list()
for (i in seq.int(1,6)){
measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
for (measure in seq.int(2,5)){
for (TR in seq.int(1,14)){
measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],averages_from_template[[i]][,TR],use = "pairwise.complete.obs")
}
}
measure_by_time <- data.frame(t(measure_by_time))
measure_by_time$TR <- seq.int(1,14)
colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
corr_to_behav_plots[[names(averages_from_template)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
geom_line(aes(color=variable))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(averages_from_template)[i])+
theme_classic()
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
## Warning: Removed 56 row(s) containing missing values (geom_path).
(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
plot_list <- list()
for(trial_type in seq.int(1,6)){
temp_plot_data <- merge(p200_data, averages_from_template[[trial_type]],by="PTID")
temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
# Acc
plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
# BPRS
plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
}
We see positive relationships with omnibus span and accuracy with classification at incorrect high load trials, and a negative relationship with BPRS total score in high load correct trials. We also see a correlation between the difference in probabilty of classifying a face between correct and incorrect trials at high load and omnibus span and accuracy.
(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /
(plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /
(plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /
(plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
cor.test(averages_from_template[["high_incorrect"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_incorrect"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 2.0069, df = 167, p-value = 0.04637
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.002558343 0.297526576
## sample estimates:
## cor
## 0.1534591
cor.test(averages_from_template[["high_load_correct_diff"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_load_correct_diff"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -2.2482, df = 167, p-value = 0.02587
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.31422824 -0.02098152
## sample estimates:
## cor
## -0.1713987
cor.test(averages_from_template[["high_incorrect"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_incorrect"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 2.7151, df = 167, p-value = 0.007322
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.05640331 0.34583808
## sample estimates:
## cor
## 0.2056126
cor.test(averages_from_template[["high_load_correct_diff"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_load_correct_diff"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -2.1863, df = 167, p-value = 0.03018
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.30996626 -0.01626123
## sample estimates:
## cor
## -0.1668118
cor.test(averages_from_template[["high_correct"]]$V6,temp_plot_data$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_correct"]]$V6 and temp_plot_data$BPRS_TOT
## t = -2.001, df = 167, p-value = 0.04701
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.29711809 -0.00211025
## sample estimates:
## cor
## -0.1530215
There are no significant relationships with behavior at delay period.
(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /
(plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /
(plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /
(plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
There is a significant negative relationship between BPRS and classification in the high incorrect trials, which is still significant even if you remove the outlier. Similarly, the correlation between the difference in probability of correct and incorrect trials at high load and BPRS is significant, even with the outlier removed.
(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /
(plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /
(plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /
(plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).
cor.test(averages_from_template[["high_incorrect"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_incorrect"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.5847, df = 167, p-value = 0.1149
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.02979447 0.26775338
## sample estimates:
## cor
## 0.121713
cor.test(averages_from_template[["high_load_correct_diff"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_load_correct_diff"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -1.6731, df = 167, p-value = 0.09618
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.27404381 0.02301058
## sample estimates:
## cor
## -0.1283955
cor.test(averages_from_template[["high_correct"]]$V11,temp_plot_data$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_correct"]]$V11 and temp_plot_data$BPRS_TOT
## t = 0.97609, df = 167, p-value = 0.3304
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.07651243 0.22373374
## sample estimates:
## cor
## 0.07531755
cor.test(averages_from_template[["high_correct"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_correct"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 0.98999, df = 166, p-value = 0.3236
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.0756754 0.2254073
## sample estimates:
## cor
## 0.07661221
cor.test(averages_from_template[["high_incorrect"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_incorrect"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = -3.5858, df = 166, p-value = 0.0004416
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4031638 -0.1216496
## sample estimates:
## cor
## -0.2681209
cor.test(averages_from_template[["high_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 3.3834, df = 166, p-value = 0.0008931
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1066856 0.3903892
## sample estimates:
## cor
## 0.2539926
behav_classification_corr_list <- list()
for (trial_type in seq.int(1,6)){
group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_omnibus) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_omnibus) <- seq.int(1,14)
group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_acc) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_acc) <- seq.int(1,14)
group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_BPRS) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_BPRS) <- seq.int(1,14)
for (level in seq.int(1,3)){
temp_subj <- split_template[["all_data"]][[trial_type]][[level]][order(split_template[["all_data"]][[trial_type]][[level]]$PTID),]
temp_data <- data_to_plot[data_to_plot$PTID %in% split_template[["all_data"]][[trial_type]][[level]]$PTID,]
for (TR in seq.int(1,14)){
group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
}
group_corrs_acc$level <- factor(rownames(group_corrs_acc))
group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
}
behav_classification_corr_list[["omnibus"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_omnibus
behav_classification_corr_list[["BPRS"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_BPRS
behav_classification_corr_list[["L3_Acc"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_acc
}
behav_classification_corr_melt <- list()
behav_split_plot_list <- list()
for (measure in seq.int(1,3)){
for (trial_type in seq.int(1,6)){
behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <-
ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
geom_line(aes(color=level))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
xlab("TR")+
ylab("Correlation")+
theme_classic()
}
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) /
(behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).
(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) +
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) /
(behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).
(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) +
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) /
(behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).
(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) +
plot_annotation("BPRS Total with Face Classification Probability by Group")+
plot_layout(guides="collect")